disable interrupts before exiting to guest context.
Also sprinkle about some assertions about interrupt-enable status.
Signed-off-by: Keir Fraser <keir@xensource.com>
/* %edx == trap_bounce, %ebx == struct vcpu */
/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
create_bounce_frame:
+ ASSERT_INTERRUPTS_ENABLED
movl UREGS_eflags+4(%esp),%ecx
movb UREGS_cs+4(%esp),%cl
testl $(2|X86_EFLAGS_VM),%ecx
/* %rbx: struct vcpu, interrupts disabled */
compat_restore_all_guest:
+ ASSERT_INTERRUPTS_DISABLED
RESTORE_ALL
addq $8,%rsp
.Lft0: iretq
ENTRY(compat_int80_direct_trap)
call compat_create_bounce_frame
- jmp compat_restore_all_guest
+ jmp compat_test_all_events
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
/* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */
/* %rdx: trap_bounce, %rbx: struct vcpu */
/* On return only %rbx is guaranteed non-clobbered. */
compat_create_bounce_frame:
+ ASSERT_INTERRUPTS_ENABLED
mov %fs,%edi
testb $2,UREGS_cs+8(%rsp)
jz 1f
/* %rbx: struct vcpu, interrupts disabled */
restore_all_guest:
+ ASSERT_INTERRUPTS_DISABLED
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
jz iret_exit_to_guest
/* Check that the callback is non-null. */
leaq VCPU_int80_bounce(%rbx),%rdx
- cmp $0, TRAPBOUNCE_flags(%rdx)
+ cmp $0,TRAPBOUNCE_flags(%rdx)
jz int80_slow_path
movq VCPU_domain(%rbx),%rax
jnz compat_int80_direct_trap
call create_bounce_frame
- jmp restore_all_guest
+ jmp test_all_events
int80_slow_path:
/*
/* %rdx: trap_bounce, %rbx: struct vcpu */
/* On return only %rbx is guaranteed non-clobbered. */
create_bounce_frame:
+ ASSERT_INTERRUPTS_ENABLED
testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
jnz 1f
/* Push new frame at registered guest-OS stack base. */
regs->r9, regs->r10, regs->r11);
printk("r12: %016lx r13: %016lx r14: %016lx\n",
regs->r12, regs->r13, regs->r14);
- printk("r15: %016lx\n", regs->r15);
+ printk("r15: %016lx cs: %016lx ss: %016lx\n",
+ regs->r15, (long)regs->cs, (long)regs->ss);
show_stack_overflow(cpu, regs->rsp);
panic("DOUBLE FAULT -- system shutdown\n");
idt_table[TRAP_double_fault].a |= 1UL << 32; /* IST1 */
idt_table[TRAP_nmi].a |= 2UL << 32; /* IST2 */
-#ifdef CONFIG_COMPAT
- /* The hypercall entry vector is only accessible from ring 1. */
+ /*
+ * The 32-on-64 hypercall entry vector is only accessible from ring 1.
+ * Also note that this is a trap gate, not an interrupt gate.
+ */
_set_gate(idt_table+HYPERCALL_VECTOR, 15, 1, &compat_hypercall);
+
+ /* Fast trap for int80 (faster than taking the #GP-fixup path). */
_set_gate(idt_table+0x80, 15, 3, &int80_direct_trap);
-#endif
}
stack_bottom = (char *)get_stack_bottom();
((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \
((sel) == (!IS_COMPAT(d) ? \
FLAT_KERNEL_CS : /* Xen default seg? */ \
- FLAT_COMPAT_KERNEL_CS)) || /* Xen default compat seg? */ \
+ FLAT_COMPAT_KERNEL_CS)) || \
((sel) & 4)) /* LDT seg? */
#endif /* __ASSEMBLY__ */
#define SETUP_EXCEPTION_FRAME_POINTER \
movl %esp,%ebp; \
notl %ebp
+#define ASSERT_INTERRUPT_STATUS(x) \
+ pushf; \
+ testb $X86_EFLAGS_IF>>8,1(%esp); \
+ j##x 1f; \
+ ud2a; \
+1: addl $4,%esp;
#else
#define SETUP_EXCEPTION_FRAME_POINTER
+#define ASSERT_INTERRUPT_STATUS(x)
#endif
+#define ASSERT_INTERRUPTS_ENABLED ASSERT_INTERRUPT_STATUS(nz)
+#define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
+
#define __SAVE_ALL_PRE \
cld; \
pushl %eax; \
#define SETUP_EXCEPTION_FRAME_POINTER \
movq %rsp,%rbp; \
notq %rbp
+#define ASSERT_INTERRUPT_STATUS(x) \
+ pushf; \
+ testb $X86_EFLAGS_IF>>8,1(%rsp); \
+ j##x 1f; \
+ ud2a; \
+1: addq $8,%rsp;
#else
#define SETUP_EXCEPTION_FRAME_POINTER
+#define ASSERT_INTERRUPT_STATUS(x)
#endif
+#define ASSERT_INTERRUPTS_ENABLED ASSERT_INTERRUPT_STATUS(nz)
+#define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
+
#define SAVE_ALL \
cld; \
pushq %rdi; \